Steps:
# Import libraries and packages
import matplotlib as plt
import matplotlib.pyplot as plt
%matplotlib inline
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix , accuracy_score, classification_report
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import SGD
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import random
import pickle
import cv2
import os
import time # time1 = time.time(); print('Time taken: {:.1f} seconds'.format(time.time() - time1))
import warnings
from tqdm import tqdm_notebook as tqdm
import itertools
warnings.filterwarnings("ignore")
SEED = 42 # set random seed
# initialize the data and labels
print("[INFO] loading images...")
time1 = time.time() # to measure time taken
data = []
labels = []
classes = ["Forest", "Buildings"]
# grab the image paths and randomly shuffle them
imagePaths = sorted(list(paths.list_images('dataset'))) # data folder with 2 categorical folders
random.seed(SEED)
random.shuffle(imagePaths)
# progress bar
with tqdm(total=len(imagePaths)) as pbar:
# loop over the input images
for imagePath in imagePaths:
# load the image, resize the image to be 32x32 pixels (ignoring aspect ratio),
# flatten the 32x32x3=3072 pixel image into a list, and store the image in the data list
image = cv2.imread(imagePath)
image = cv2.resize(image, (32, 32)).flatten()
data.append(image)
# extract the class label from the image path and update the labels list
label = imagePath.split(os.path.sep)[-2]
label = 1 if label == "Buildings" else 0
labels.append(label)
# update the progressbar
pbar.update(1)
# scale the raw pixel intensities to the range [0, 1]
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
print('Time taken: {:.1f} seconds'.format(time.time() - time1)) # to measure time taken
print("done")
[INFO] loading images...
Time taken: 4.1 seconds done
print("Total Images: ", len(data))
Total Images: 883
# sample data for first image
print("sample image: {}".format(data[0]))
print("no of features/pixels values: {}".format(len(data[0]))) # 32x32x3=3072
print("label: {}".format(classes[labels[0]]))
sample image: [0.31764706 0.29803922 0.28627451 ... 0.74117647 0.58823529 0.4627451 ] no of features/pixels values: 3072 label: Forest
# partition the data into 80% training and 20% validation
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.2, random_state=SEED)
trainX.shape
(706, 3072)
trainY.shape
(706,)
testX.shape
(177, 3072)
testY.shape
(177,)
trainX
array([[0.20392157, 0.57254902, 0.34509804, ..., 0.00784314, 0.17254902,
0.07843137],
[0.85490196, 0.87843137, 0.95686275, ..., 0. , 0. ,
0.01176471],
[0.76862745, 0.77254902, 0.78039216, ..., 0.44313725, 0.46666667,
0.48627451],
...,
[0.88235294, 0.87058824, 0.80392157, ..., 0.21960784, 0.19215686,
0.23137255],
[0.84313725, 0.78823529, 0.79215686, ..., 0.3372549 , 0.36862745,
0.62745098],
[0.02352941, 0.35686275, 0.17254902, ..., 0.09411765, 0.28627451,
0.21568627]])
trainY
array([0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1,
1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0,
1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1,
1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0,
0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0,
1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1,
0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0,
0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1,
1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1,
0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0,
0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0,
0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1,
0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0,
0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0,
1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0,
1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1,
1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0,
0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0,
0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1,
1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1,
0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0,
1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1,
1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0,
0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0,
1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0,
0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1,
1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0,
1, 0])
type(trainY)
numpy.ndarray
testY
array([1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0,
0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1,
0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1,
0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1,
0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0,
1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1,
1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1,
1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1,
1])
# convert the labels from integers/categories to vectors
trainY = to_categorical(trainY, num_classes=2) # fit_transform = find all unique class labels + transform into one-hot encoded labels
testY = to_categorical(testY, num_classes=2) # transform = perform the one-hot encoding (unique class labels already found)
# [0,1] Buildings
# [1,0] Forest
# testY
trainY
array([[1., 0.],
[0., 1.],
[0., 1.],
...,
[1., 0.],
[0., 1.],
[1., 0.]], dtype=float32)
sample_image = (trainX[5] * 255).astype("int")
plt.imshow(sample_image.reshape(32,32,3))
<matplotlib.image.AxesImage at 0x20983fdc848>
trainY[5] # [0,1] means buildings [1,0] means forest
array([0., 1.], dtype=float32)
# define the 3072-1024-512-1 architecture using Keras
model = Sequential()
# input layer 3072 as there are 32x32x3=3072 pixels in a flattened input image
# first hidden layer has 1024 nodes
model.add(Dense(units= 1024, input_shape=(3072,), kernel_initializer = 'uniform', activation="relu"))
# # dropout for second layer
# model.add(Dropout(0.4))
# second hidden layer has 512 nodes
model.add(Dense(units=512, kernel_initializer='uniform', activation="relu"))
# output layer with number of possible class labels
model.add(Dense(units=2,kernel_initializer='uniform', activation="softmax"))
# initialize our initial learning rate and # of epochs to train for
INIT_LR = 0.01
EPOCHS = 50
# compile the model using SGD as our optimizer and categorical cross-entropy loss
# (you'll want to use binary_crossentropy for 2-class classification)
print("[INFO] compiling network network...")
opt = SGD(lr=INIT_LR) # Stochastic Gradient Descent (SGD) optimizer
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
[INFO] compiling network network...
model.summary()
Model: "sequential_2" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_6 (Dense) (None, 1024) 3146752 _________________________________________________________________ dense_7 (Dense) (None, 512) 524800 _________________________________________________________________ dense_8 (Dense) (None, 2) 1026 ================================================================= Total params: 3,672,578 Trainable params: 3,672,578 Non-trainable params: 0 _________________________________________________________________
# train the neural network on training data set
# batch_size (32) controls the size of each group of data to pass through the network.
time1 = time.time() # to measure time taken
H = model.fit(trainX, trainY, validation_data=(testX, testY), epochs=EPOCHS, batch_size=32)
print('Time taken: {:.1f} seconds'.format(time.time() - time1)) # to measure time taken
Train on 706 samples, validate on 177 samples Epoch 1/50 706/706 [==============================] - 4s 6ms/sample - loss: 0.6417 - accuracy: 0.5595 - val_loss: 0.5998 - val_accuracy: 0.7627 Epoch 2/50 706/706 [==============================] - 1s 2ms/sample - loss: 0.5690 - accuracy: 0.7011 - val_loss: 0.5348 - val_accuracy: 0.6949 Epoch 3/50 706/706 [==============================] - 1s 1ms/sample - loss: 0.5121 - accuracy: 0.7691 - val_loss: 0.4719 - val_accuracy: 0.8023 Epoch 4/50 706/706 [==============================] - 1s 1ms/sample - loss: 0.4770 - accuracy: 0.7975 - val_loss: 0.4480 - val_accuracy: 0.7684 Epoch 5/50 706/706 [==============================] - 1s 967us/sample - loss: 0.4409 - accuracy: 0.8286 - val_loss: 0.4041 - val_accuracy: 0.8249 Epoch 6/50 706/706 [==============================] - 1s 904us/sample - loss: 0.4233 - accuracy: 0.8244 - val_loss: 0.3978 - val_accuracy: 0.8644 Epoch 7/50 706/706 [==============================] - 1s 830us/sample - loss: 0.3959 - accuracy: 0.8314 - val_loss: 0.5498 - val_accuracy: 0.7119 Epoch 8/50 706/706 [==============================] - 0s 681us/sample - loss: 0.3828 - accuracy: 0.8470 - val_loss: 0.3717 - val_accuracy: 0.8249 Epoch 9/50 706/706 [==============================] - 0s 701us/sample - loss: 0.3551 - accuracy: 0.8569 - val_loss: 0.5454 - val_accuracy: 0.7345 Epoch 10/50 706/706 [==============================] - 1s 729us/sample - loss: 0.3716 - accuracy: 0.8584 - val_loss: 0.6455 - val_accuracy: 0.6667 Epoch 11/50 706/706 [==============================] - 1s 717us/sample - loss: 0.3501 - accuracy: 0.8541 - val_loss: 0.4008 - val_accuracy: 0.7966 Epoch 12/50 706/706 [==============================] - 1s 717us/sample - loss: 0.3228 - accuracy: 0.8810 - val_loss: 1.4973 - val_accuracy: 0.4746 Epoch 13/50 706/706 [==============================] - 0s 681us/sample - loss: 0.3753 - accuracy: 0.8612 - val_loss: 3.8260 - val_accuracy: 0.4633 Epoch 14/50 706/706 [==============================] - 1s 719us/sample - loss: 0.5073 - accuracy: 0.8442 - val_loss: 0.6052 - val_accuracy: 0.7288 Epoch 15/50 706/706 [==============================] - 0s 697us/sample - loss: 0.3178 - accuracy: 0.8867 - val_loss: 0.3326 - val_accuracy: 0.8701 Epoch 16/50 706/706 [==============================] - 1s 782us/sample - loss: 0.2839 - accuracy: 0.8980 - val_loss: 0.6133 - val_accuracy: 0.6836 Epoch 17/50 706/706 [==============================] - 0s 692us/sample - loss: 0.2806 - accuracy: 0.9051 - val_loss: 0.3339 - val_accuracy: 0.8701 Epoch 18/50 706/706 [==============================] - 0s 688us/sample - loss: 0.2674 - accuracy: 0.9008 - val_loss: 0.3636 - val_accuracy: 0.8079 Epoch 19/50 706/706 [==============================] - 0s 686us/sample - loss: 0.2766 - accuracy: 0.8994 - val_loss: 0.3475 - val_accuracy: 0.8757 Epoch 20/50 706/706 [==============================] - 0s 695us/sample - loss: 0.2569 - accuracy: 0.9093 - val_loss: 0.6117 - val_accuracy: 0.7571 Epoch 21/50 706/706 [==============================] - 0s 695us/sample - loss: 0.2637 - accuracy: 0.9008 - val_loss: 0.3109 - val_accuracy: 0.8814 Epoch 22/50 706/706 [==============================] - 0s 678us/sample - loss: 0.2367 - accuracy: 0.9193 - val_loss: 1.3067 - val_accuracy: 0.5367 Epoch 23/50 706/706 [==============================] - 0s 687us/sample - loss: 0.3124 - accuracy: 0.8697 - val_loss: 0.3499 - val_accuracy: 0.8249 Epoch 24/50 706/706 [==============================] - 1s 718us/sample - loss: 0.2196 - accuracy: 0.9249 - val_loss: 2.2409 - val_accuracy: 0.4689 Epoch 25/50 706/706 [==============================] - 0s 696us/sample - loss: 0.3324 - accuracy: 0.8881 - val_loss: 0.3368 - val_accuracy: 0.8870 Epoch 26/50 706/706 [==============================] - 1s 720us/sample - loss: 0.2042 - accuracy: 0.9334 - val_loss: 0.6058 - val_accuracy: 0.7232 Epoch 27/50 706/706 [==============================] - 1s 731us/sample - loss: 0.2215 - accuracy: 0.9306 - val_loss: 0.3265 - val_accuracy: 0.8701 Epoch 28/50 706/706 [==============================] - 0s 691us/sample - loss: 0.2041 - accuracy: 0.9419 - val_loss: 0.3082 - val_accuracy: 0.8757 Epoch 29/50 706/706 [==============================] - 1s 715us/sample - loss: 0.1852 - accuracy: 0.9448 - val_loss: 0.3061 - val_accuracy: 0.8870 Epoch 30/50 706/706 [==============================] - 0s 687us/sample - loss: 0.1898 - accuracy: 0.9221 - val_loss: 2.7578 - val_accuracy: 0.4689 Epoch 31/50 706/706 [==============================] - 0s 687us/sample - loss: 0.3253 - accuracy: 0.9263 - val_loss: 0.3118 - val_accuracy: 0.8757 Epoch 32/50 706/706 [==============================] - 0s 694us/sample - loss: 0.1822 - accuracy: 0.9448 - val_loss: 0.3960 - val_accuracy: 0.8192 Epoch 33/50 706/706 [==============================] - 1s 731us/sample - loss: 0.1628 - accuracy: 0.9533 - val_loss: 0.4246 - val_accuracy: 0.8136 Epoch 34/50 706/706 [==============================] - 1s 709us/sample - loss: 0.1585 - accuracy: 0.9575 - val_loss: 0.6257 - val_accuracy: 0.7740 Epoch 35/50 706/706 [==============================] - 0s 684us/sample - loss: 0.1794 - accuracy: 0.9448 - val_loss: 0.3286 - val_accuracy: 0.8531 Epoch 36/50 706/706 [==============================] - 1s 711us/sample - loss: 0.1442 - accuracy: 0.9575 - val_loss: 0.3119 - val_accuracy: 0.8757 Epoch 37/50 706/706 [==============================] - ETA: 0s - loss: 0.1626 - accuracy: 0.94 - 1s 720us/sample - loss: 0.1624 - accuracy: 0.9462 - val_loss: 0.4237 - val_accuracy: 0.8192 Epoch 38/50 706/706 [==============================] - 0s 694us/sample - loss: 0.1803 - accuracy: 0.9419 - val_loss: 0.3390 - val_accuracy: 0.8588 Epoch 39/50 706/706 [==============================] - 1s 718us/sample - loss: 0.1592 - accuracy: 0.9533 - val_loss: 0.3110 - val_accuracy: 0.8701 Epoch 40/50 706/706 [==============================] - 1s 725us/sample - loss: 0.1439 - accuracy: 0.9575 - val_loss: 2.1636 - val_accuracy: 0.4859 Epoch 41/50 706/706 [==============================] - 0s 689us/sample - loss: 0.3070 - accuracy: 0.9150 - val_loss: 0.6471 - val_accuracy: 0.7062 Epoch 42/50 706/706 [==============================] - 1s 722us/sample - loss: 0.1559 - accuracy: 0.9490 - val_loss: 0.3192 - val_accuracy: 0.8644 Epoch 43/50 706/706 [==============================] - ETA: 0s - loss: 0.1356 - accuracy: 0.95 - 0s 707us/sample - loss: 0.1355 - accuracy: 0.9589 - val_loss: 0.3794 - val_accuracy: 0.8475 Epoch 44/50 706/706 [==============================] - 0s 701us/sample - loss: 0.1078 - accuracy: 0.9773 - val_loss: 0.3398 - val_accuracy: 0.8531 Epoch 45/50 706/706 [==============================] - 1s 710us/sample - loss: 0.0976 - accuracy: 0.9773 - val_loss: 0.4097 - val_accuracy: 0.8136 Epoch 46/50 706/706 [==============================] - 0s 695us/sample - loss: 0.1035 - accuracy: 0.9688 - val_loss: 0.3865 - val_accuracy: 0.8531 Epoch 47/50 706/706 [==============================] - 0s 692us/sample - loss: 0.1096 - accuracy: 0.9759 - val_loss: 0.3528 - val_accuracy: 0.8644 Epoch 48/50 706/706 [==============================] - 0s 695us/sample - loss: 0.1742 - accuracy: 0.9348 - val_loss: 0.6773 - val_accuracy: 0.7062 Epoch 49/50 706/706 [==============================] - 1s 718us/sample - loss: 0.1779 - accuracy: 0.9320 - val_loss: 0.3281 - val_accuracy: 0.8588 Epoch 50/50 706/706 [==============================] - 1s 715us/sample - loss: 0.1032 - accuracy: 0.9731 - val_loss: 0.3352 - val_accuracy: 0.8701 Time taken: 30.7 seconds
# evaluate the network
print("[INFO] evaluating network...")
pred_prob = model.predict(testX, batch_size=32)
[INFO] evaluating network...
pred_prob
array([[8.22680164e-03, 9.91773188e-01],
[9.92109716e-01, 7.89027102e-03],
[1.49924518e-03, 9.98500824e-01],
[9.98867989e-01, 1.13203132e-03],
[7.10417449e-01, 2.89582521e-01],
[9.99868751e-01, 1.31283057e-04],
[3.09927575e-03, 9.96900797e-01],
[7.50794411e-01, 2.49205530e-01],
[9.88341630e-01, 1.16583332e-02],
[9.99987364e-01, 1.26612622e-05],
[3.64055276e-01, 6.35944724e-01],
[3.57580096e-01, 6.42419934e-01],
[5.32260118e-03, 9.94677424e-01],
[9.10902143e-01, 8.90978873e-02],
[4.24960941e-01, 5.75039029e-01],
[7.46427536e-01, 2.53572464e-01],
[5.60478456e-02, 9.43952143e-01],
[1.49227865e-03, 9.98507679e-01],
[9.99678373e-01, 3.21697764e-04],
[3.78565537e-03, 9.96214330e-01],
[8.85982588e-02, 9.11401749e-01],
[8.10537875e-01, 1.89462140e-01],
[9.47059512e-01, 5.29404916e-02],
[7.94092251e-04, 9.99205887e-01],
[3.92415328e-04, 9.99607623e-01],
[4.60840523e-01, 5.39159417e-01],
[8.10282171e-01, 1.89717829e-01],
[5.65594964e-05, 9.99943495e-01],
[9.94706690e-01, 5.29330131e-03],
[1.49302825e-01, 8.50697160e-01],
[9.98939216e-01, 1.06075709e-03],
[1.18207440e-01, 8.81792545e-01],
[9.99605358e-01, 3.94723524e-04],
[9.98689711e-01, 1.31028180e-03],
[9.97493267e-01, 2.50677811e-03],
[1.86170146e-01, 8.13829839e-01],
[8.12156618e-01, 1.87843442e-01],
[9.99580562e-01, 4.19443124e-04],
[2.12027753e-05, 9.99978781e-01],
[4.44541173e-03, 9.95554626e-01],
[1.38924001e-02, 9.86107588e-01],
[4.71197814e-03, 9.95288014e-01],
[8.93632233e-01, 1.06367767e-01],
[4.96293709e-04, 9.99503732e-01],
[8.63971949e-01, 1.36028066e-01],
[7.00010955e-01, 2.99988985e-01],
[4.61579502e-01, 5.38420558e-01],
[5.00908121e-03, 9.94990885e-01],
[1.28500527e-02, 9.87149954e-01],
[4.67683515e-03, 9.95323241e-01],
[1.60690199e-03, 9.98393118e-01],
[9.77903068e-01, 2.20969152e-02],
[2.00972576e-02, 9.79902685e-01],
[9.03849006e-01, 9.61510688e-02],
[1.11019509e-02, 9.88898098e-01],
[9.99579012e-01, 4.20947181e-04],
[3.22590859e-05, 9.99967694e-01],
[4.16209400e-01, 5.83790600e-01],
[9.66137528e-01, 3.38624232e-02],
[1.13492377e-01, 8.86507571e-01],
[1.23546552e-03, 9.98764515e-01],
[9.99706447e-01, 2.93479126e-04],
[3.15525047e-02, 9.68447506e-01],
[9.96653140e-01, 3.34690651e-03],
[4.67423163e-02, 9.53257740e-01],
[3.55994552e-01, 6.44005477e-01],
[8.31737876e-01, 1.68262139e-01],
[9.15437222e-01, 8.45628083e-02],
[1.22495871e-02, 9.87750471e-01],
[1.67863637e-01, 8.32136393e-01],
[9.99824226e-01, 1.75773384e-04],
[6.50562525e-01, 3.49437535e-01],
[6.45966709e-01, 3.54033262e-01],
[7.42983865e-03, 9.92570102e-01],
[5.39253592e-01, 4.60746408e-01],
[4.75872634e-03, 9.95241284e-01],
[9.66254592e-01, 3.37453373e-02],
[9.98145700e-01, 1.85430155e-03],
[2.22978857e-03, 9.97770190e-01],
[1.21830765e-03, 9.98781741e-01],
[3.00616562e-01, 6.99383378e-01],
[3.61245841e-01, 6.38754189e-01],
[9.53667238e-03, 9.90463316e-01],
[9.99958754e-01, 4.11914516e-05],
[9.49808061e-01, 5.01919687e-02],
[9.91457641e-01, 8.54235142e-03],
[1.03012499e-04, 9.99897003e-01],
[3.90260015e-03, 9.96097386e-01],
[4.76507008e-01, 5.23492992e-01],
[4.23778454e-03, 9.95762289e-01],
[9.13971543e-01, 8.60284194e-02],
[4.14473965e-04, 9.99585569e-01],
[2.86633178e-04, 9.99713361e-01],
[9.95680451e-01, 4.31958260e-03],
[1.26415845e-02, 9.87358391e-01],
[3.46146256e-01, 6.53853714e-01],
[5.06817596e-03, 9.94931757e-01],
[3.74473512e-01, 6.25526488e-01],
[4.11600049e-04, 9.99588430e-01],
[8.69933188e-01, 1.30066812e-01],
[9.98499036e-01, 1.50094926e-03],
[3.96417856e-01, 6.03582203e-01],
[3.73716699e-04, 9.99626279e-01],
[9.99845505e-01, 1.54502675e-04],
[9.56819177e-01, 4.31808196e-02],
[8.61557841e-04, 9.99138355e-01],
[9.66016576e-03, 9.90339816e-01],
[1.64503045e-03, 9.98354971e-01],
[9.95344579e-01, 4.65542404e-03],
[5.60401706e-03, 9.94395971e-01],
[1.03209890e-01, 8.96790147e-01],
[9.99283016e-01, 7.17019837e-04],
[9.97187793e-01, 2.81214854e-03],
[9.25555170e-01, 7.44448453e-02],
[9.80833828e-01, 1.91661939e-02],
[1.26700010e-02, 9.87329960e-01],
[9.33269322e-01, 6.67306706e-02],
[1.09328679e-03, 9.98906732e-01],
[4.47672866e-02, 9.55232680e-01],
[5.62788174e-03, 9.94372129e-01],
[5.39364177e-04, 9.99460638e-01],
[7.29317486e-01, 2.70682514e-01],
[9.72978119e-03, 9.90270257e-01],
[4.37840559e-02, 9.56215918e-01],
[1.75527515e-04, 9.99824464e-01],
[1.19387573e-02, 9.88061190e-01],
[9.88312542e-01, 1.16874790e-02],
[4.83068943e-01, 5.16931117e-01],
[3.05803405e-04, 9.99694228e-01],
[8.74313116e-02, 9.12568688e-01],
[1.60289593e-02, 9.83971059e-01],
[3.59755242e-03, 9.96402502e-01],
[2.21058607e-01, 7.78941333e-01],
[6.11908035e-04, 9.99388099e-01],
[9.99963522e-01, 3.65088163e-05],
[9.96183932e-01, 3.81604885e-03],
[8.90171900e-02, 9.10982847e-01],
[8.74581575e-01, 1.25418365e-01],
[6.00683428e-02, 9.39931631e-01],
[2.22722563e-04, 9.99777257e-01],
[1.31478667e-01, 8.68521333e-01],
[4.25976008e-01, 5.74024022e-01],
[7.67136991e-01, 2.32862934e-01],
[2.41487869e-03, 9.97585058e-01],
[5.10742841e-03, 9.94892597e-01],
[2.31771037e-01, 7.68228889e-01],
[1.06432140e-02, 9.89356816e-01],
[1.69028819e-03, 9.98309731e-01],
[1.13396352e-04, 9.99886632e-01],
[9.55781564e-02, 9.04421866e-01],
[9.74522948e-01, 2.54769716e-02],
[9.99999881e-01, 1.58658551e-07],
[9.99570310e-01, 4.29741543e-04],
[9.86209998e-05, 9.99901414e-01],
[3.13521875e-03, 9.96864736e-01],
[7.41915643e-01, 2.58084387e-01],
[3.38307843e-02, 9.66169238e-01],
[8.02426483e-04, 9.99197543e-01],
[7.36622587e-02, 9.26337719e-01],
[2.19720975e-01, 7.80279040e-01],
[1.01718842e-03, 9.98982847e-01],
[7.46940494e-01, 2.53059536e-01],
[9.99402285e-01, 5.97695005e-04],
[5.77630043e-01, 4.22369987e-01],
[4.01285999e-02, 9.59871411e-01],
[2.72157195e-04, 9.99727786e-01],
[7.76625216e-01, 2.23374784e-01],
[1.29279227e-03, 9.98707175e-01],
[9.85466322e-05, 9.99901414e-01],
[9.99994278e-01, 5.68358928e-06],
[7.52489865e-01, 2.47510120e-01],
[2.23995686e-01, 7.76004314e-01],
[1.08887956e-01, 8.91112030e-01],
[9.64785159e-01, 3.52148302e-02],
[3.39251757e-01, 6.60748303e-01],
[4.36058675e-04, 9.99563873e-01],
[4.94781928e-03, 9.95052159e-01]], dtype=float32)
9.25910056e-01
0.925910056
9.6146774e-01
0.96146774
# testY
# Note: buildings -> 1 and forest -> 0
test_y = [ np.argmax(i) for i in testY]
pred_y = [ np.argmax(i) for i in pred_prob]
# test_y
pred_y
[1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1]
def plot_confusion_metrix(y_true, y_pred,classes,
normalize=False,
title='Confusion Matrix',
cmap=plt.cm.Blues):
"""
Objective
----------
plot confussion matrix, classification report and accuracy score
parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated targets as returned by a classifier.
classes : list
List of labels to index the matrix
title : title for matrix
cmap : colormap for matrix
returns
----------
all accruacy matrix
"""
cm = confusion_matrix(y_true,y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized Confusion Matrix")
else:
print("Confusion Matrix, Without Normalisation")
plt.imshow(cm, interpolation='nearest',cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks,classes,rotation=35)
plt.yticks(tick_marks,classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() /2.
for i , j in itertools.product(range(cm.shape[0]), range(cm.shape[0])):
plt.text(j, i, format(cm[i,j], fmt),
horizontalalignment='center',
color='white' if cm[i, j] > thresh else 'black')
plt.ylabel('True label')
plt.xlabel('Predicted label')
# plt.tight_layout()
plt.show()
print("-----------------------------------------------------")
print('Classification report')
print(classification_report(y_true,y_pred))
print("-----------------------------------------------------")
acc= accuracy_score(y_true,y_pred)
print("Accuracy of the model: ", acc)
plot_confusion_metrix(test_y, pred_y,classes=["Forest: 0","Buildings: 1"])
Confusion Matrix, Without Normalisation
-----------------------------------------------------
Classification report
precision recall f1-score support
0 0.93 0.78 0.85 82
1 0.83 0.95 0.89 95
accuracy 0.87 177
macro avg 0.88 0.86 0.87 177
weighted avg 0.88 0.87 0.87 177
-----------------------------------------------------
Accuracy of the model: 0.8700564971751412
# plot the training and validation loss
N = np.arange(0, EPOCHS)
plt.style.use("ggplot")
plt.figure(figsize = [10,8])
plt.plot(N, H.history["loss"], label="train_loss")
plt.plot(N, H.history["val_loss"], label="val_loss")
plt.title("ANN: Training & Validation Loss")
plt.xlabel("Epoch #", weight="bold")
plt.ylabel("Loss", weight="bold")
plt.legend()
plt.show()
# plot the training and validation accuracy
N = np.arange(0, EPOCHS)
plt.style.use("ggplot")
plt.figure(figsize = [10,8])
plt.plot(N, H.history["accuracy"], label="train_acc")
plt.plot(N, H.history["val_accuracy"], label="val_acc")
plt.title("ANN: Training and Validation Accuracy")
plt.xlabel("Epoch #", weight="bold")
plt.ylabel("Accuracy", weight="bold")
plt.legend()
plt.show()
# accuracy = 88%
model.summary()
Model: "sequential_2" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_6 (Dense) (None, 1024) 3146752 _________________________________________________________________ dense_7 (Dense) (None, 512) 524800 _________________________________________________________________ dense_8 (Dense) (None, 2) 1026 ================================================================= Total params: 3,672,578 Trainable params: 3,672,578 Non-trainable params: 0 _________________________________________________________________
# save the model and label binarizer to disk
print("[INFO] serializing network and label binarizer...")
model.save('model_ANN.h5')
[INFO] serializing network and label binarizer...
# import the necessary packages
from tensorflow.keras.models import load_model
import pickle
import cv2
import imutils
import matplotlib.pyplot as plt
%matplotlib inline
# load the model
print("[INFO] loading network and...")
model = load_model("model_ANN.h5")
[INFO] loading network and...
def display_img(img):
fig = plt.figure(figsize=(12,10))
plt.grid(b=None)
ax = fig.add_subplot(111)
ax.imshow(img)
# load the input image and resize it to the target spatial dimensions
width = 32
height = 32
# grab the image paths and randomly shuffle them
testImagePaths = sorted(list(paths.list_images('test_examples'))) # test data folder with random images
# progress bar
with tqdm(total=len(testImagePaths)) as pbar:
for imagePath in testImagePaths:
image = cv2.imread(imagePath)
output = image.copy()
image = cv2.resize(image, (width, height))
# scale the pixel values to [0, 1]
image = image.astype("float") / 255.0
# for a simple fully-connected network, flatten the image
image = image.flatten()
image = image.reshape((1, image.shape[0]))
# make a prediction on the image
preds = model.predict(image)
# find the class label index with the largest corresponding probability
i = preds.argmax(axis=1)[0]
label = classes[i]
label = "{}: {:.2f}%".format(label, preds[0][i] * 100)
output = imutils.resize(output, width=400)
cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (0, 255, 0), 2)
# convert img to rgb format and display in noteboo
img = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
display_img(img)
# print("############################")
# print("image: {}".format(os.path.split(imagePath)[-1]))
# print("predicted label: {}".format(label))
# print("Confidence: {}".format(preds[0][i]))
pbar.update(1)
!pip install gradio
import gradio as gr
def predict_image(image):
image = cv2.resize(image, (32, 32))
# scale the pixel values to [0, 1]
image = image.astype("float") / 255.0
# for a simple fully-connected network, flatten the image
image = image.flatten()
image = image.reshape((1, image.shape[0]))
# make a prediction on the image
preds = model.predict(image).flatten()
result = dict()
result["Forest"] = round(float(list(preds)[0]), 3)
result["Buildings"] = round(float(list(preds)[1]), 3)
print(result)
return result
im = gr.inputs.Image(shape=(32,32))
label = gr.outputs.Label(num_top_classes=2)
gr.Interface(fn=predict_image, inputs=im, outputs=label, capture_session=True, title="ANN Demo").launch(share=True)
Running locally at: http://127.0.0.1:7876/ This share link will expire in 24 hours. If you need a permanent link, visit: https://gradio.app/introducing-hosted (NEW!) Running on External URL: https://28525.gradio.app Interface loading below...
(<Flask 'gradio.networking'>, 'http://127.0.0.1:7876/', 'https://28525.gradio.app')
{'Forest': 0.948, 'Buildings': 0.052}
{'Forest': 0.001, 'Buildings': 0.999}
{'Forest': 0.002, 'Buildings': 0.998}